Definition of runqueues pushed from the generic scheduler to the specific schedulers. BVT, FBVT and RRobin fixed to work with the new interface.
/* all per-domain BVT-specific scheduling info is stored here */
struct bvt_dom_info
{
- unsigned long mcu_advance; /* inverse of weight */
- u32 avt; /* actual virtual time */
- u32 evt; /* effective virtual time */
- int warpback; /* warp? */
- long warp; /* virtual time warp */
- long warpl; /* warp limit */
- long warpu; /* unwarp time requirement */
- s_time_t warped; /* time it ran warped last time */
- s_time_t uwarped; /* time it ran unwarped last time */
+ struct domain *domain; /* domain this info belongs to */
+ struct list_head run_list; /* runqueue list pointers */
+ unsigned long mcu_advance; /* inverse of weight */
+ u32 avt; /* actual virtual time */
+ u32 evt; /* effective virtual time */
+ int warpback; /* warp? */
+ long warp; /* virtual time warp */
+ long warpl; /* warp limit */
+ long warpu; /* unwarp time requirement */
+ s_time_t warped; /* time it ran warped last time */
+ s_time_t uwarped; /* time it ran unwarped last time */
};
struct bvt_cpu_info
{
- unsigned long svt; /* XXX check this is unsigned long! */
+ struct list_head runqueue; /* runqueue for given processor */
+ unsigned long svt; /* XXX check this is unsigned long! */
};
#define BVT_INFO(p) ((struct bvt_dom_info *)(p)->sched_priv)
#define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
+#define RUNLIST(p) &(BVT_INFO(p)->run_list)
+#define RUNQUEUE(cpu) &(CPU_INFO(cpu)->runqueue)
#define CPU_SVT(cpu) (CPU_INFO(cpu)->svt)
#define MCU (s32)MICROSECS(100) /* Minimum unit */
void bvt_add_task(struct domain *p)
{
struct bvt_dom_info *inf = BVT_INFO(p);
-
ASSERT(inf != NULL);
ASSERT(p != NULL);
inf->mcu_advance = MCU_ADVANCE;
-
+ inf->domain = p;
+
if ( p->domain == IDLE_DOMAIN_ID )
{
inf->avt = inf->evt = ~0U;
return;
}
+int bvt_init_idle_task(struct domain *p)
+{
+ unsigned long flags;
+
+ if(bvt_alloc_task(p) < 0) return -1;
+
+ bvt_add_task(p);
+
+ spin_lock_irqsave(&schedule_lock[p->processor], flags);
+ set_bit(DF_RUNNING, &p->flags);
+ if ( !__task_on_runqueue(RUNLIST(p)) )
+ __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
+ spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
+
+ return 0;
+}
+
/**
* bvt_free_task - free BVT private structures for a task
* @p: task
*/
static task_slice_t bvt_do_schedule(s_time_t now)
{
- struct domain *prev = current, *next = NULL, *next_prime, *p;
+ struct domain *prev = current, *next = NULL, *next_prime, *p;
struct list_head *tmp;
int cpu = prev->processor;
s32 r_time; /* time for new dom to run */
__calc_evt(prev_inf);
- __del_from_runqueue(prev);
+ __del_from_runqueue(RUNLIST(prev));
if ( domain_runnable(prev) )
- __add_to_runqueue_tail(prev);
+ __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
}
+
/* We should at least have the idle task */
- ASSERT(!list_empty(&schedule_data[cpu].runqueue));
+ ASSERT(!list_empty(RUNQUEUE(cpu)));
/*
* scan through the run queue and pick the task with the lowest evt
* *and* the task the second lowest evt.
* this code is O(n) but we expect n to be small.
*/
- next = schedule_data[cpu].idle;
- next_prime = NULL;
+ next_inf = BVT_INFO(schedule_data[cpu].idle);
+ next_prime_inf = NULL;
next_evt = ~0U;
next_prime_evt = ~0U;
min_avt = ~0U;
- list_for_each ( tmp, &schedule_data[cpu].runqueue )
+ list_for_each ( tmp, RUNQUEUE(cpu) )
{
- p = list_entry(tmp, struct domain, run_list);
- p_inf = BVT_INFO(p);
+ p_inf = list_entry(tmp, struct bvt_dom_info, run_list);
if ( p_inf->evt < next_evt )
{
- next_prime = next;
- next_prime_evt = next_evt;
- next = p;
- next_evt = p_inf->evt;
+ next_prime_inf = next_inf;
+ next_prime_evt = next_evt;
+ next_inf = p_inf;
+ next_evt = p_inf->evt;
}
else if ( next_prime_evt == ~0U )
{
- next_prime_evt = p_inf->evt;
- next_prime = p;
+ next_prime_evt = p_inf->evt;
+ next_prime_inf = p_inf;
}
else if ( p_inf->evt < next_prime_evt )
{
- next_prime_evt = p_inf->evt;
- next_prime = p;
+ next_prime_evt = p_inf->evt;
+ next_prime_inf = p_inf;
}
/* Determine system virtual time. */
min_avt = p_inf->avt;
}
+ /* Extract the domain pointers from the dom infos */
+ next = next_inf->domain;
+ next_prime = next_prime_inf->domain;
+
/* Update system virtual time. */
if ( min_avt != ~0U )
CPU_SVT(cpu) = min_avt;
goto sched_done;
}
- next_prime_inf = BVT_INFO(next_prime);
- next_inf = BVT_INFO(next);
-
/*
* If we are here then we have two runnable tasks.
* Work out how long 'next' can run till its evt is greater than
static void bvt_dump_cpu_state(int i)
{
+ unsigned long flags;
+ struct list_head *list, *queue;
+ int loop = 0;
+ struct bvt_dom_info *d_inf;
+ struct domain *d;
+
+ spin_lock_irqsave(&schedule_lock[i], flags);
printk("svt=0x%08lX ", CPU_SVT(i));
+
+ queue = RUNQUEUE(i);
+ printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
+ (unsigned long) queue->next, (unsigned long) queue->prev);
+
+ list_for_each ( list, queue )
+ {
+ d_inf = list_entry(list, struct bvt_dom_info, run_list);
+ d = d_inf->domain;
+ printk("%3d: %u has=%c ", loop++, d->domain,
+ test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
+ bvt_dump_runq_el(d);
+ printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
+ printk(" l: %lx n: %lx p: %lx\n",
+ (unsigned long)list, (unsigned long)list->next,
+ (unsigned long)list->prev);
+ }
+ spin_unlock_irqrestore(&schedule_lock[i], flags);
}
+/* We use cache to create the bvt_dom_infos
+ this functions makes sure that the run_list
+ is initialised properly. The new domain needs
+ NOT to appear as to be on the runqueue */
+static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
+{
+ struct bvt_dom_info *dom_inf = (struct bvt_dom_info*)arg1;
+ dom_inf->run_list.next = NULL;
+ dom_inf->run_list.prev = NULL;
+}
/* Initialise the data structures. */
int bvt_init_scheduler()
for ( i = 0; i < NR_CPUS; i++ )
{
schedule_data[i].sched_priv = kmalloc(sizeof(struct bvt_cpu_info));
+ INIT_LIST_HEAD(RUNQUEUE(i));
+
if ( schedule_data[i].sched_priv == NULL )
{
printk("Failed to allocate BVT scheduler per-CPU memory!\n");
dom_info_cache = kmem_cache_create("BVT dom info",
sizeof(struct bvt_dom_info),
- 0, 0, NULL, NULL);
+ 0, 0, cache_constructor, NULL);
if ( dom_info_cache == NULL )
{
{
if ( test_bit(DF_RUNNING, &d->flags) )
cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
- else if ( __task_on_runqueue(d) )
- __del_from_runqueue(d);
+ else if ( __task_on_runqueue(RUNLIST(d)) )
+ __del_from_runqueue(RUNLIST(d));
}
void bvt_wake(struct domain *d)
int cpu = d->processor;
/* If on the runqueue already then someone has done the wakeup work. */
- if ( unlikely(__task_on_runqueue(d)) )
+ if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
return;
- __add_to_runqueue_head(d);
+ __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(d->processor));
now = NOW();
.sched_id = SCHED_BVT,
.init_scheduler = bvt_init_scheduler,
+ .init_idle_task = bvt_init_idle_task,
.alloc_task = bvt_alloc_task,
.add_task = bvt_add_task,
.free_task = bvt_free_task,
.adjdom = bvt_adjdom,
.dump_settings = bvt_dump_settings,
.dump_cpu_state = bvt_dump_cpu_state,
- .dump_runq_el = bvt_dump_runq_el,
.sleep = bvt_sleep,
.wake = bvt_wake,
};
/* all per-domain BVT-specific scheduling info is stored here */
struct fbvt_dom_info
{
- unsigned long mcu_advance; /* inverse of weight */
- u32 avt; /* actual virtual time */
- u32 evt; /* effective virtual time */
- u32 time_slept; /* amount of time slept */
- int warpback; /* warp? */
- long warp; /* virtual time warp */
- long warpl; /* warp limit */
- long warpu; /* unwarp time requirement */
- s_time_t warped; /* time it ran warped last time */
- s_time_t uwarped; /* time it ran unwarped last time */
+ struct domain *domain; /* domain this info belongs to */
+ struct list_head run_list; /* runqueue pointers */
+ unsigned long mcu_advance; /* inverse of weight */
+ u32 avt; /* actual virtual time */
+ u32 evt; /* effective virtual time */
+ u32 time_slept; /* amount of time slept */
+ int warpback; /* warp? */
+ long warp; /* virtual time warp */
+ long warpl; /* warp limit */
+ long warpu; /* unwarp time requirement */
+ s_time_t warped; /* time it ran warped last time */
+ s_time_t uwarped; /* time it ran unwarped last time */
};
struct fbvt_cpu_info
{
- unsigned long svt; /* XXX check this is unsigned long! */
- u32 vtb; /* virtual time bonus */
- u32 r_time; /* last time to run */
+ struct list_head runqueue; /* runqueue for this CPU */
+ unsigned long svt; /* XXX check this is unsigned long! */
+ u32 vtb; /* virtual time bonus */
+ u32 r_time; /* last time to run */
};
#define FBVT_INFO(p) ((struct fbvt_dom_info *)(p)->sched_priv)
#define CPU_INFO(cpu) ((struct fbvt_cpu_info *)(schedule_data[cpu]).sched_priv)
+#define RUNLIST(p) (struct list_head *)(&(FBVT_INFO(p)->run_list))
+#define RUNQUEUE(cpu) (struct list_head *)&(CPU_INFO(cpu)->runqueue)
#define CPU_SVT(cpu) (CPU_INFO(cpu)->svt)
#define LAST_VTB(cpu) (CPU_INFO(cpu)->vtb)
#define R_TIME(cpu) (CPU_INFO(cpu)->r_time)
ASSERT(p != NULL);
inf->mcu_advance = MCU_ADVANCE;
+ inf->domain = p;
if ( p->domain == IDLE_DOMAIN_ID )
{
inf->avt = inf->evt = ~0U;
return;
}
+int fbvt_init_idle_task(struct domain *p)
+{
+ unsigned long flags;
+
+ if(fbvt_alloc_task(p) < 0) return -1;
+
+ fbvt_add_task(p);
+//printk("< ----- >Initialising idle task for processor %d, address %d, priv %d\n", p->processor, (int)p, (int)p->sched_priv);
+ spin_lock_irqsave(&schedule_lock[p->processor], flags);
+ set_bit(DF_RUNNING, &p->flags);
+ if ( !__task_on_runqueue(RUNLIST(p)) )
+ __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
+ spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
+
+ return 0;
+}
+
+
/**
* fbvt_free_task - free FBVT private structures for a task
* @p: task
struct fbvt_dom_info *next_prime_inf = NULL;
task_slice_t ret;
+//if(prev->sched_priv == NULL) printk("----> %d\n", prev->domain);
ASSERT(prev->sched_priv != NULL);
ASSERT(prev_inf != NULL);
__calc_evt(prev_inf);
- __del_from_runqueue(prev);
+ __del_from_runqueue(RUNLIST(prev));
if ( domain_runnable(prev) )
- __add_to_runqueue_tail(prev);
+ __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
}
/* We should at least have the idle task */
- ASSERT(!list_empty(&schedule_data[cpu].runqueue));
+ ASSERT(!list_empty(RUNQUEUE(cpu)));
/*
* scan through the run queue and pick the task with the lowest evt
* *and* the task the second lowest evt.
* this code is O(n) but we expect n to be small.
*/
- next = schedule_data[cpu].idle;
- next_prime = NULL;
+ next_inf = FBVT_INFO(schedule_data[cpu].idle);
+ next_prime_inf = NULL;
next_evt = ~0U;
next_prime_evt = ~0U;
min_avt = ~0U;
- list_for_each ( tmp, &schedule_data[cpu].runqueue )
+ list_for_each ( tmp, RUNQUEUE(cpu) )
{
- p = list_entry(tmp, struct domain, run_list);
- p_inf = FBVT_INFO(p);
+ p_inf = list_entry(tmp, struct fbvt_dom_info, run_list);
if ( p_inf->evt < next_evt )
{
- next_prime = next;
- next_prime_evt = next_evt;
- next = p;
- next_evt = p_inf->evt;
- }
+ next_prime_inf = next_inf;
+ next_prime_evt = next_evt;
+ next_inf = p_inf;
+ next_evt = p_inf->evt;
+ }
else if ( next_prime_evt == ~0U )
{
- next_prime_evt = p_inf->evt;
- next_prime = p;
- }
+ next_prime_evt = p_inf->evt;
+ next_prime_inf = p_inf;
+ }
else if ( p_inf->evt < next_prime_evt )
{
- next_prime_evt = p_inf->evt;
- next_prime = p;
+ next_prime_evt = p_inf->evt;
+ next_prime_inf = p_inf;
}
/* Determine system virtual time. */
min_avt = p_inf->avt;
}
+ /* Extract the domain pointers from the dom infos */
+ next = next_inf->domain;
+ next_prime = next_prime_inf->domain;
+
+
/* Update system virtual time. */
if ( min_avt != ~0U )
CPU_SVT(cpu) = min_avt;
CPU_SVT(cpu) -= 0xe0000000;
}
- next_prime_inf = FBVT_INFO(next_prime);
- next_inf = FBVT_INFO(next);
-
/* check for time_slept overrun for the domain we schedule to run*/
if(next_inf->time_slept >= 0xf0000000)
{
next->min_slice = ctx_allow;
ret.task = next;
ret.time = r_time;
-
+//printk("NEXT --> domain %d (address %d, processor %d), priv %d\n",next->domain, (int)next, next->processor, (int)next->sched_priv);
return ret;
}
static void fbvt_dump_runq_el(struct domain *p)
{
struct fbvt_dom_info *inf = FBVT_INFO(p);
-
- printk("mcua=%04lu ev=%08u av=%08u sl=%08u",
- inf->mcu_advance, inf->evt, inf->avt, inf->time_slept);
+
+ printk("mcua=0x%04lX ev=0x%08X av=0x%08X ",
+ inf->mcu_advance, inf->evt, inf->avt);
}
static void fbvt_dump_settings(void)
{
- printk("FBVT: mcu=0x%08Xns ctx_allow=0x%08Xns ", (u32)MCU, (s32)ctx_allow );
+ printk("BVT: mcu=0x%08Xns ctx_allow=0x%08Xns ", (u32)MCU, (s32)ctx_allow );
}
static void fbvt_dump_cpu_state(int i)
{
+ unsigned long flags;
+ struct list_head *list, *queue;
+ int loop = 0;
+ struct fbvt_dom_info *d_inf;
+ struct domain *d;
+
+ spin_lock_irqsave(&schedule_lock[i], flags);
printk("svt=0x%08lX ", CPU_SVT(i));
+
+ queue = RUNQUEUE(i);
+ printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
+ (unsigned long) queue->next, (unsigned long) queue->prev);
+
+ list_for_each ( list, queue )
+ {
+ d_inf = list_entry(list, struct fbvt_dom_info, run_list);
+ d = d_inf->domain;
+ printk("%3d: %u has=%c ", loop++, d->domain,
+ test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
+ fbvt_dump_runq_el(d);
+ printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
+ printk(" l: %lx n: %lx p: %lx\n",
+ (unsigned long)list, (unsigned long)list->next,
+ (unsigned long)list->prev);
+ }
+ spin_unlock_irqrestore(&schedule_lock[i], flags);
+}
+
+
+/* We use cache to create the bvt_dom_infos
+ this functions makes sure that the run_list
+ is initialised properly. The new domain needs
+ NOT to appear as to be on the runqueue */
+static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
+{
+ struct fbvt_dom_info *dom_inf = (struct fbvt_dom_info*)arg1;
+ dom_inf->run_list.next = NULL;
+ dom_inf->run_list.prev = NULL;
}
+
/* Initialise the data structures. */
int fbvt_init_scheduler()
for ( i = 0; i < NR_CPUS; i++ )
{
schedule_data[i].sched_priv = kmalloc(sizeof(struct fbvt_cpu_info));
+ INIT_LIST_HEAD(RUNQUEUE(i));
if ( schedule_data[i].sched_priv == NULL )
{
printk("Failed to allocate FBVT scheduler per-CPU memory!\n");
dom_info_cache = kmem_cache_create("FBVT dom info",
sizeof(struct fbvt_dom_info),
- 0, 0, NULL, NULL);
+ 0, 0, cache_constructor, NULL);
if ( dom_info_cache == NULL )
{
{
if ( test_bit(DF_RUNNING, &d->flags) )
cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
- else if ( __task_on_runqueue(d) )
- __del_from_runqueue(d);
+ else if ( __task_on_runqueue(RUNLIST(d)) )
+ __del_from_runqueue(RUNLIST(d));
}
static void fbvt_wake(struct domain *d)
int cpu = d->processor;
s32 io_warp;
+//printk("-|--> Adding new domain %d\n",d->domain);
+//printk("-|--> Current%d (address %d, processor %d) %d\n",current->domain,(int)current, current->processor, (int)current->sched_priv);
/* If on the runqueue already then someone has done the wakeup work. */
- if ( unlikely(__task_on_runqueue(d)) )
+ if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
return;
-
- __add_to_runqueue_head(d);
-
+//printk("----> Not on runqueue\n");
+ __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
+//printk(" ---> %d\n",(int)current->sched_priv);
+
now = NOW();
#if 0
__calc_evt(inf);
curr = schedule_data[cpu].curr;
-
+//printk(" ---> %d\n",(int)current->sched_priv);
+
/* Currently-running domain should run at least for ctx_allow. */
min_time = curr->lastschd + curr->min_slice;
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
-}
+//printk(" ---> %d\n",(int)current->sched_priv);
+}
struct scheduler sched_fbvt_def = {
.name = "Fair Borrowed Virtual Time",
.sched_id = SCHED_FBVT,
.init_scheduler = fbvt_init_scheduler,
+ .init_idle_task = fbvt_init_idle_task,
.alloc_task = fbvt_alloc_task,
.add_task = fbvt_add_task,
.free_task = fbvt_free_task,
.adjdom = fbvt_adjdom,
.dump_settings = fbvt_dump_settings,
.dump_cpu_state = fbvt_dump_cpu_state,
- .dump_runq_el = fbvt_dump_runq_el,
.sleep = fbvt_sleep,
.wake = fbvt_wake,
};
#include <xen/ac_timer.h>
#include <xen/softirq.h>
#include <xen/time.h>
+#include <xen/slab.h>
#define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
static s_time_t rr_slice = MILLISECS(10);
+/* Only runqueue pointers and domain pointer*/
+struct rrobin_dom_info
+{
+ struct list_head run_list;
+ struct domain *domain;
+};
+
+#define RR_INFO(d) ((struct rrobin_dom_info *)d->sched_priv)
+#define RUNLIST(d) (struct list_head *)&(RR_INFO(d)->run_list)
+#define RUNQUEUE(cpu) RUNLIST(schedule_data[cpu].idle)
+
+// TODO remove following line
+static void rr_dump_cpu_state(int cpu);
+
+/* SLAB cache for struct rrobin_dom_info objects */
+static kmem_cache_t *dom_info_cache;
+
+
+/* Ensures proper initialisation of the dom_info */
+static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
+{
+ struct rrobin_dom_info *dom_inf = (struct rrobin_dom_info*)arg1;
+ dom_inf->run_list.next = NULL;
+ dom_inf->run_list.prev = NULL;
+}
+
+
+/* Initialises the runqueues and creates the domain info cache */
+static int rr_init_scheduler()
+{
+ int i;
+
+ for ( i = 0; i < NR_CPUS; i++ )
+ INIT_LIST_HEAD(RUNQUEUE(i));
+
+ dom_info_cache = kmem_cache_create("FBVT dom info",
+ sizeof(struct rrobin_dom_info),
+ 0, 0, cache_constructor, NULL);
+
+ if(dom_info_cache == NULL)
+ {
+ printk("Could not allocate SLAB cache.\n");
+ return -1;
+ }
+ return 0;
+}
+
+/* Allocates memory for per domain private scheduling data*/
+static int rr_alloc_task(struct domain *d)
+{
+ d->sched_priv = kmem_cache_alloc(dom_info_cache);
+ if ( d->sched_priv == NULL )
+ return -1;
+
+ return 0;
+}
+
+/* Setup the rr_dom_info */
+static void rr_add_task(struct domain *p)
+{
+ struct rrobin_dom_info *inf;
+ RR_INFO(p)->domain = p;
+ inf = RR_INFO(p);
+}
+
+/* Frees memory used by domain info */
+static void rr_free_task(struct domain *p)
+{
+ ASSERT( p->sched_priv != NULL );
+ kmem_cache_free( dom_info_cache, p->sched_priv );
+}
+
+/* Initialises idle task */
+static int rr_init_idle_task(struct domain *p)
+{
+ unsigned long flags;
+ if(rr_alloc_task(p) < 0) return -1;
+ rr_add_task(p);
+
+ spin_lock_irqsave(&schedule_lock[p->processor], flags);
+ set_bit(DF_RUNNING, &p->flags);
+ if ( !__task_on_runqueue(RUNLIST(p)) )
+ __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
+ spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
+ return 0;
+}
+
+
+/* Main scheduling function */
static task_slice_t rr_do_schedule(s_time_t now)
{
struct domain *prev = current;
int cpu = current->processor;
+
task_slice_t ret;
-
- __del_from_runqueue(prev);
+
+ if(!is_idle_task(prev))
+ {
+ __del_from_runqueue(RUNLIST(prev));
- if ( domain_runnable(prev) )
- __add_to_runqueue_tail(prev);
+ if ( domain_runnable(prev) )
+ __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
+ }
- ret.task = list_entry(schedule_data[cpu].runqueue.next,
- struct domain, run_list);
-
+ ret.task = list_entry( RUNQUEUE(cpu).next->next,
+ struct rrobin_dom_info,
+ run_list)->domain;
ret.time = rr_slice;
-
return ret;
}
+/* Set/retrive control parameter(s) */
static int rr_ctl(struct sched_ctl_cmd *cmd)
{
if ( cmd->direction == SCHED_INFO_PUT )
{
if ( test_bit(DF_RUNNING, &d->flags) )
cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
- else if ( __task_on_runqueue(d) )
- __del_from_runqueue(d);
+ else if ( __task_on_runqueue(RUNLIST(d)) )
+ __del_from_runqueue(RUNLIST(d));
}
void rr_wake(struct domain *d)
int cpu = d->processor;
/* If on the runqueue already then someone has done the wakeup work. */
- if ( unlikely(__task_on_runqueue(d)) )
+ if ( unlikely(__task_on_runqueue(RUNLIST(d))))
return;
- __add_to_runqueue_head(d);
-
+ __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
now = NOW();
curr = schedule_data[cpu].curr;
mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
}
+
+static void rr_dump_domain(struct domain *d)
+{
+ printk("%u has=%c ", d->domain,
+ test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
+ printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
+}
+
+static void rr_dump_cpu_state(int i)
+{
+ unsigned long flags;
+ struct list_head *list, *queue;
+ int loop = 0;
+ struct rrobin_dom_info *d_inf;
+
+ spin_lock_irqsave(&schedule_lock[i], flags);
+
+ queue = RUNQUEUE(i);
+ printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
+ (unsigned long) queue->next, (unsigned long) queue->prev);
+
+ printk("%3d: ",loop++);
+ d_inf = list_entry(queue, struct rrobin_dom_info, run_list);
+ rr_dump_domain(d_inf->domain);
+
+ list_for_each ( list, queue )
+ {
+ printk("%3d: ",loop++);
+ d_inf = list_entry(list, struct rrobin_dom_info, run_list);
+ rr_dump_domain(d_inf->domain);
+ }
+ spin_unlock_irqrestore(&schedule_lock[i], flags);
+}
+
+
struct scheduler sched_rrobin_def = {
.name = "Round-Robin Scheduler",
.opt_name = "rrobin",
.sched_id = SCHED_RROBIN,
-
+
+ .init_idle_task = rr_init_idle_task,
+ .alloc_task = rr_alloc_task,
+ .add_task = rr_add_task,
+ .free_task = rr_free_task,
+ .init_scheduler = rr_init_scheduler,
.do_schedule = rr_do_schedule,
.control = rr_ctl,
.dump_settings = rr_dump_settings,
+ .dump_cpu_state = rr_dump_cpu_state,
.sleep = rr_sleep,
.wake = rr_wake,
};
void init_idle_task(void)
{
- unsigned long flags;
struct domain *d = current;
- if ( SCHED_OP(alloc_task, d) < 0)
- panic("Failed to allocate scheduler private data for idle task");
- SCHED_OP(add_task, d);
-
- spin_lock_irqsave(&schedule_lock[d->processor], flags);
- set_bit(DF_RUNNING, &d->flags);
- if ( !__task_on_runqueue(d) )
- __add_to_runqueue_head(d);
- spin_unlock_irqrestore(&schedule_lock[d->processor], flags);
+ if ( SCHED_OP(init_idle_task, d) < 0)
+ panic("Failed to initialise idle task for processor %d",d->processor);
}
void domain_sleep(struct domain *d)
{
unsigned long flags;
int cpu = d->processor;
-
spin_lock_irqsave(&schedule_lock[cpu], flags);
if ( likely(domain_runnable(d)) )
{
rem_ac_timer(&schedule_data[cpu].s_timer);
ASSERT(!in_irq());
- ASSERT(__task_on_runqueue(prev));
+ // TODO - move to specific scheduler ASSERT(__task_on_runqueue(prev));
if ( test_bit(DF_BLOCKED, &prev->flags) )
{
for ( i = 0; i < NR_CPUS; i++ )
{
- INIT_LIST_HEAD(&schedule_data[i].runqueue);
spin_lock_init(&schedule_lock[i]);
schedule_data[i].curr = &idle0_task;
}
-static void dump_rqueue(struct list_head *queue, char *name)
-{
- struct list_head *list;
- int loop = 0;
- struct domain *d;
-
- printk("QUEUE %s %lx n: %lx, p: %lx\n", name, (unsigned long)queue,
- (unsigned long) queue->next, (unsigned long) queue->prev);
-
- list_for_each ( list, queue )
- {
- d = list_entry(list, struct domain, run_list);
- printk("%3d: %u has=%c ", loop++, d->domain,
- test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
- SCHED_OP(dump_runq_el, d);
- printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
- printk(" l: %lx n: %lx p: %lx\n",
- (unsigned long)list, (unsigned long)list->next,
- (unsigned long)list->prev);
- }
-}
-
void dump_runq(u_char key, void *dev_id, struct pt_regs *regs)
{
- unsigned long flags;
s_time_t now = NOW();
int i;
printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now);
for ( i = 0; i < smp_num_cpus; i++ )
{
- spin_lock_irqsave(&schedule_lock[i], flags);
printk("CPU[%02d] ", i);
SCHED_OP(dump_cpu_state,i);
- dump_rqueue(&schedule_data[i].runqueue, "rq");
- spin_unlock_irqrestore(&schedule_lock[i], flags);
}
}
typedef struct schedule_data_st
{
- struct list_head runqueue; /* runqueue */
struct domain *curr; /* current task */
struct domain *idle; /* idle task for this cpu */
void * sched_priv;
unsigned int sched_id; /* ID for this scheduler */
int (*init_scheduler) ();
+ int (*init_idle_task) (struct domain *);
int (*alloc_task) (struct domain *);
void (*add_task) (struct domain *);
void (*free_task) (struct domain *);
struct sched_adjdom_cmd *);
void (*dump_settings) (void);
void (*dump_cpu_state) (int);
- void (*dump_runq_el) (struct domain *);
int (*prn_state) (int);
};
* Wrappers for run-queue management. Must be called with the schedule_lock
* held.
*/
-static inline void __add_to_runqueue_head(struct domain * p)
-{
- list_add(&p->run_list, &schedule_data[p->processor].runqueue);
-}
-
-static inline void __add_to_runqueue_tail(struct domain * p)
+static inline void __add_to_runqueue_head(struct list_head *run_list, struct list_head *runqueue)
{
- list_add_tail(&p->run_list, &schedule_data[p->processor].runqueue);
+ list_add(run_list, runqueue);
}
-static inline void __del_from_runqueue(struct domain * p)
+static inline void __add_to_runqueue_tail(struct list_head *run_list, struct list_head *runqueue)
{
- list_del(&p->run_list);
- p->run_list.next = NULL;
+ list_add_tail(run_list, runqueue);
}
-static inline int __task_on_runqueue(struct domain *p)
+static inline void __del_from_runqueue(struct list_head *run_list)
{
- return p->run_list.next != NULL;
+ list_del(run_list);
+ run_list->next = NULL;
}
-#define next_domain(p) \\
- list_entry((p)->run_list.next, struct domain, run_list)
-
-
-static inline int __runqueue_empty(int cpu)
+static inline int __task_on_runqueue(struct list_head *run_list)
{
- return list_empty(&schedule_data[cpu].runqueue);
+ return run_list->next != NULL;
}
+
unsigned int xenheap_pages; /* # pages allocated from Xen heap */
/* Scheduling. */
- struct list_head run_list;
int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
s_time_t lastschd; /* time this domain was last scheduled */
s_time_t lastdeschd; /* time this domain was last descheduled */